Conversion from p2m entry to physical address, it needs to use
_PAGE_PPN_MASK to mask out some bits which are used by other
purposes by p2m entry.
Sign-off-by : Xiantao Zhang <xiantao.zhang@intel.com>
* which is required by vga acceleration since qemu maps shared
* vram buffer with WB.
*/
- if (mfn_valid(maddr >> PAGE_SHIFT) && phy_pte.ma != VA_MATTR_NATPAGE)
+ if (mfn_valid((maddr & _PAGE_PPN_MASK) >> PAGE_SHIFT)
+ && phy_pte.ma != VA_MATTR_NATPAGE)
phy_pte.ma = VA_MATTR_WB;
maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
/* in HVM guest, when VTD is enabled,
* P2M entry may change from _PAGE_IO type to real MMIO page
*/
- if(VMX_DOMAIN(d->vcpu[0]) && (pte_val(ret_pte) & _PAGE_IO)) {
+ if(is_hvm_domain(d) && (pte_val(ret_pte) & _PAGE_IO) &&
+ !mfn_valid(physaddr >> PAGE_SHIFT)) {
old_pte = ret_pte;
goto again_hvm_page_io;
}